In [1]:
import pandas as pd
import copy
import numpy as np
from pulp import *
import math
# import io

Counties

In [2]:
df_counties = pd.read_csv("https://www2.census.gov/geo/docs/reference/cenpop2020/county/CenPop2020_Mean_CO47.txt")
df_counties.head()
Out[2]:
STATEFP COUNTYFP COUNAME STNAME POPULATION LATITUDE LONGITUDE
0 47 1 Anderson Tennessee 77123 36.081238 -84.178765
1 47 3 Bedford Tennessee 50237 35.512233 -86.465018
2 47 5 Benton Tennessee 15864 36.070898 -88.090044
3 47 7 Bledsoe Tennessee 14913 35.621511 -85.200314
4 47 9 Blount Tennessee 135280 35.753197 -83.973461
In [3]:
from sklearn.neighbors import DistanceMetric
from math import radians

df_counties['lat'] = np.radians(df_counties['LATITUDE'])
df_counties['lon'] = np.radians(df_counties['LONGITUDE'])

dist = DistanceMetric.get_metric('haversine')

# df_counties[['lat','lon']].to_numpy()
# dist.pairwise(df_counties[['lat','lon']].to_numpy())*6373

df_dist = pd.DataFrame(dist.pairwise(df_counties[['lat','lon']].to_numpy())*6373,  columns=df_counties.COUNAME.unique(), index=df_counties.COUNAME.unique())

df_dist.head()
C:\ProgramData\Anaconda3\lib\site-packages\sklearn\neighbors\_distance_metric.py:14: FutureWarning: sklearn.neighbors.DistanceMetric has been moved to sklearn.metrics.DistanceMetric in 1.0. This import path will be removed in 1.3
  category=FutureWarning,
Out[3]:
Anderson Bedford Benton Bledsoe Blount Bradley Campbell Cannon Carroll Carter ... Unicoi Union Van Buren Warren Washington Wayne Weakley White Williamson Wilson
Anderson 0.000000 215.746842 351.601970 105.341524 40.907173 118.833258 33.730957 172.640463 385.420630 179.262267 ... 160.491703 37.343545 122.092954 151.823154 158.955412 338.923441 413.141758 118.896313 240.774512 200.810609
Bedford 215.746842 0.000000 159.238387 115.071436 226.829526 150.620873 231.748833 48.307818 189.026128 394.209704 ... 373.842595 252.276299 94.018560 63.938331 374.195906 123.460888 225.388816 100.245785 56.260865 76.702279
Benton 351.601970 159.238387 0.000000 265.284335 372.502239 308.871252 357.337630 184.948544 34.145375 529.045519 ... 511.787246 384.297840 240.813720 211.022452 508.420444 94.373749 66.151652 234.484570 113.319050 151.649630
Bledsoe 105.341524 115.071436 265.284335 0.000000 111.799532 59.202613 128.419111 80.471436 297.661216 280.702964 ... 259.596122 142.683444 25.065789 54.346880 261.044674 238.158660 330.264030 44.107974 152.026002 126.535969
Blount 40.907173 226.829526 372.502239 111.799532 0.000000 103.400539 71.216744 188.717313 405.733235 171.192307 ... 148.887246 56.543014 133.813939 164.590793 152.411849 349.943329 435.942553 138.228906 259.653596 224.838724

5 rows × 95 columns

Adjacency Data

In [4]:
df_county_adj = pd.read_csv("https://www2.census.gov/geo/docs/reference/county_adjacency.txt", sep='\t',encoding = 'latin1',
                           header = None, names = ['County', 'County GEOID', 'Neighbor', 'Neighbor GEOID'], \
                           dtype = {'County GEOID': 'category', 'Neighbor GEOID': 'category'})

#Filling values down as they appear in an aggregate view
df_county_adj.ffill(inplace = True) 

#Filtering to just TN
df_copy = copy.deepcopy(df_county_adj[df_county_adj['County'].str.contains("TN")])
df_TN_adj = copy.deepcopy(df_copy[df_copy['Neighbor'].str.contains("TN")])

#Removing extra text from county names
df_TN_adj['County'] = df_TN_adj['County'].str.replace(" County, TN", "")
df_TN_adj['Neighbor'] = df_TN_adj['Neighbor'].str.replace(" County, TN", "")

#Creating an adjacency matrix
df_TN_adj['Values'] = 1
df_TN_adj = df_TN_adj.pivot(index = 'County', columns = 'Neighbor', values = 'Values')
df_TN_adj.fillna(0, inplace = True)

df_TN_adj
Out[4]:
Neighbor Anderson Bedford Benton Bledsoe Blount Bradley Campbell Cannon Carroll Carter ... Unicoi Union Van Buren Warren Washington Wayne Weakley White Williamson Wilson
County
Anderson 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 ... 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Bedford 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Benton 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Bledsoe 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Blount 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
Wayne 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0
Weakley 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0
White 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 1.0 1.0 0.0 0.0 0.0 1.0 0.0 0.0
Williamson 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0
Wilson 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0

95 rows × 95 columns

drop counties that will be auto assigned

In [5]:
df_TN_adj = df_TN_adj.drop(labels=['Shelby','Davidson'])
df_TN_adj = df_TN_adj.drop(labels=['Shelby','Davidson'],axis = 1)
df_dist = df_dist.drop(labels=['Shelby','Davidson'])
df_dist = df_dist.drop(labels=['Shelby','Davidson'],axis = 1)

df_TN_adj
Out[5]:
Neighbor Anderson Bedford Benton Bledsoe Blount Bradley Campbell Cannon Carroll Carter ... Unicoi Union Van Buren Warren Washington Wayne Weakley White Williamson Wilson
County
Anderson 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 ... 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Bedford 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Benton 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Bledsoe 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
Blount 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
Wayne 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0
Weakley 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0
White 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 1.0 1.0 0.0 0.0 0.0 1.0 0.0 0.0
Williamson 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0
Wilson 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 ... 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0

93 rows × 93 columns

Demographics

In [6]:
df_race = pd.read_csv("DECENNIALPL2020.P2-2022-05-04T001048.csv")

#Removing white spaces from labels column & set as index
df_race['Label (Grouping)'] = df_race['Label (Grouping)'].str.strip()
df_race.set_index('Label (Grouping)', inplace = True)

#Removing all other races and leaving total, white alone, and other
df_race = df_race.filter(items = ['Total:', 'White alone'], axis = 0)
df_race.loc['Other Races'] = df_race.loc['Total:'].str.replace(",","").astype(int) - df_race.loc['White alone'].str.replace(",","").astype(int)
#Removing extra text from column names
race_cols = df_race.columns.values
new_race_cols = []
for i in range(0,len(race_cols)):
    new_race_cols.append(race_cols[i].replace(" County, Tennessee", ""))
df_race.columns = new_race_cols

# df_race
df_race
Out[6]:
Anderson Bedford Benton Bledsoe Blount Bradley Campbell Cannon Carroll Carter ... Unicoi Union Van Buren Warren Washington Wayne Weakley White Williamson Wilson
Label (Grouping)
Total: 77,123 50,237 15,864 14,913 135,280 108,620 39,272 14,506 28,440 56,356 ... 17,928 19,802 6,168 40,953 133,001 16,232 32,902 27,351 247,726 147,737
White alone 66,044 36,499 14,378 13,129 117,952 87,830 37,101 13,064 23,744 51,790 ... 16,175 18,642 5,866 33,980 112,606 14,503 27,813 24,833 200,408 118,889
Other Races 11079 13738 1486 1784 17328 20790 2171 1442 4696 4566 ... 1753 1160 302 6973 20395 1729 5089 2518 47318 28848

3 rows × 95 columns

Data Prep

i. Find the total population and "ideal" characteristics of a district

In [7]:
total_pop = df_counties.sum()['POPULATION']
df_counties['Prop'] = df_counties['POPULATION']/total_pop
ideal_pop = total_pop/9
ideal_prop = (total_pop/9)/total_pop

print(f'ideal population = {ideal_pop} ({round(ideal_prop,3)*100}%) ' )
# df_counties.sort_values('Prop',ascending=False)

# give one rep to Shelby and Davidson Counties
# filter our large counties
df_counties = df_counties[df_counties['Prop'] < .10]
df_counties.sort_values('Prop',ascending=False)
ideal population = 767871.1111111111 (11.1%) 
Out[7]:
STATEFP COUNTYFP COUNAME STNAME POPULATION LATITUDE LONGITUDE lat lon Prop
46 47 93 Knox Tennessee 478971 35.971886 -83.991621 0.627828 -1.465930 0.069307
32 47 65 Hamilton Tennessee 366207 35.093764 -85.201806 0.612502 -1.487052 0.052990
74 47 149 Rutherford Tennessee 341486 35.888267 -86.450266 0.626368 -1.508842 0.049413
93 47 187 Williamson Tennessee 247726 35.912269 -86.846241 0.626787 -1.515753 0.035846
62 47 125 Montgomery Tennessee 220069 36.557126 -87.345978 0.638042 -1.524475 0.031844
... ... ... ... ... ... ... ... ... ... ...
47 47 95 Lake Tennessee 7005 36.356402 -89.467221 0.634539 -1.561498 0.001014
33 47 67 Hancock Tennessee 6662 36.517225 -83.231102 0.637346 -1.452657 0.000964
63 47 127 Moore Tennessee 6461 35.296438 -86.340442 0.616039 -1.506925 0.000935
87 47 175 Van Buren Tennessee 6168 35.711516 -85.454611 0.623284 -1.491464 0.000893
68 47 137 Pickett Tennessee 5001 36.565622 -85.141318 0.638190 -1.485996 0.000724

93 rows × 10 columns

In [8]:
df_counties = df_counties.sort_values('COUNAME').reset_index().drop(columns=['index'])
In [9]:
# find upper and lower bounds
lower_lim = .9 * ideal_pop - (ideal_pop*.9 % 1000)
upper_lim = 1.1 * ideal_pop - (ideal_pop*1.1 % 1000)
ideal = 1.0 * ideal_pop - (ideal_pop*1.0 % 1000)

print(lower_lim)
print(upper_lim)
691000.0
844000.0
In [10]:
n_counties = len(df_counties)
n_districts = 7

var_combs = []
for i in df_counties['COUNAME']:
    for j in range (1, n_districts+1):
        var_combs.append(str(i)+"_"+str(j))

# county_pop = makeDict([df_counties['COUNAME']],df_counties['POPULATION'] )
# population array
county_pop = np.array(df_counties['POPULATION'])
c_range = range(0,n_counties)
d_range = range(0,n_districts)
In [11]:
# adjacency
adj_mat = np.array(df_TN_adj)
# distanct
dist_mat = np.array(df_dist)

adjacencies = np.sum(adj_mat,axis = 1)

print(max(adjacencies))
print(min(adjacencies))
9.0
3.0
In [12]:
model = LpProblem("TN",LpMinimize)

var_combs.sort()
DV_y = LpVariable.matrix("Y", var_combs, cat="Binary")
assignment = np.array(DV_y).reshape(n_counties,n_districts)

Objective Function:

Minimize the difference between the average district population and the lower bound of the ideal population. (Hopefully by constraining districts to be >= the lower bound this will work)

In [13]:
# model += lpSum(assignment[i][0]*county_pop[i] for i in c_range)*(1/7) + \
#     lpSum(assignment[i][1]*county_pop[i] for i in c_range)*(1/7) + \
#         lpSum(assignment[i][2]*county_pop[i] for i in c_range)*(1/7) + \
#             lpSum(assignment[i][3]*county_pop[i] for i in c_range)*(1/7) + \
#                 lpSum(assignment[i][4]*county_pop[i] for i in c_range)*(1/7) + \
#                     lpSum(assignment[i][5]*county_pop[i] for i in c_range)*(1/7) + \
#                         lpSum(assignment[i][6]*county_pop[i] for i in c_range)*(1/7) \
#                             - lower_lim
In [14]:
model += (lpSum(assignment[i][0]*county_pop[i] for i in c_range) - lower_lim)*(1/7) + \
    (lpSum(assignment[i][1]*county_pop[i] for i in c_range)- lower_lim)*(1/7) + \
        (lpSum(assignment[i][2]*county_pop[i] for i in c_range)- lower_lim)*(1/7) + \
            (lpSum(assignment[i][3]*county_pop[i] for i in c_range)- lower_lim)*(1/7) + \
                (lpSum(assignment[i][4]*county_pop[i] for i in c_range)- lower_lim)*(1/7) + \
                    (lpSum(assignment[i][5]*county_pop[i] for i in c_range)- lower_lim)*(1/7) + \
                        (lpSum(assignment[i][6]*county_pop[i] for i in c_range)- lower_lim)*(1/7) 

Minimize

$$\sum^7_{j=1}\sum^{95}_{i=1}\,county\_pop_{ij} - lower\_lim$$

Constraints

1. One district per county

In [15]:
for i in c_range:
    model += lpSum(assignment[i][j] for j in d_range) == 1

2. Adjacency (starting with must have at least 1 adjacency)

In [16]:
for i in range(3,10):
    print(f'{i}:{np.count_nonzero(adjacencies==i)}')
3:2
4:7
5:23
6:32
7:19
8:7
9:3
In [17]:
for j in d_range:
    for i in c_range:
        if adjacencies[i] == 3:
            model += 3*assignment[i][j] <= lpSum(adj_mat[i][k]*assignment[k][j] for k in c_range)
        elif adjacencies[i] ==4:
            model += 3*assignment[i][j] <= lpSum(adj_mat[i][k]*assignment[k][j] for k in c_range)
        elif adjacencies[i] ==5:
            model += 2*assignment[i][j] <= lpSum(adj_mat[i][k]*assignment[k][j] for k in c_range)
        elif adjacencies[i] ==6:
            model += 2*assignment[i][j] <= lpSum(adj_mat[i][k]*assignment[k][j] for k in c_range)
        elif adjacencies[i] ==7:
            model += 2*assignment[i][j] <= lpSum(adj_mat[i][k]*assignment[k][j] for k in c_range)
        else:
            model += 3*assignment[i][j] <= lpSum(adj_mat[i][k]*assignment[k][j] for k in c_range)

3. Population

In [18]:
for j in d_range:
    model += lpSum(county_pop[i]*assignment[i][j] for i in c_range) >= lower_lim

for j in d_range:
    model += lpSum(county_pop[i]*assignment[i][j] for i in c_range) <= upper_lim

4. Distance

In [19]:
max_dist = math.ceil(np.max(dist_mat)*.75)
print(f"maximum allowable distance between counties = {max_dist}km")
for j in d_range:
    for i in c_range:
        for k in c_range:
            model += lpSum(max_dist*assignment[k][j]) >= \
                lpSum(dist_mat[i][k]*assignment[i][j] + dist_mat[i][k]*(assignment[k][j]-1))
maximum allowable distance between counties = 540km

if assignment k_j = 1

540 >= dist i_k * assignment i_j + dist i_k * 0

    if assignment i_j = 1

        360 >= dist_i_k*1 + 0
        ok

    if assignment i_j = 0
        360 >= 0 + 0
        ok

if assignment k_j = 0

0 >= dist i_k * assignment i_j + dist i_k * -1

    if assignment i_j = 1

        0 >= dist_i_k*1 + dist_i_k*-1
        ok

    if assignment i_j = 0:

        0 >= 0 + -1
        ok
In [20]:
model.solve()
LpStatus[model.status]
Out[20]:
'Optimal'
In [21]:
assigned_districts = pd.DataFrame()
for v in model.variables():
    if v.varValue == 1:
        if "Van_Buren" in v.name:
            df = pd.DataFrame({
                'var':[v.name],
                'county':["VanBuren"],
                'district':[v.name.split('_')[3]],
            })
        else:
            df = pd.DataFrame({
                'var':[v.name],
                'county':[v.name.split('_')[1]],
                'district':[v.name.split('_')[2]],
            })

        assigned_districts = assigned_districts.append(df)

Map

In [22]:
df_fips = pd.read_csv("tn_fips.csv")
In [23]:
from urllib.request import urlopen
import json
import random
import plotly.express as px

with urlopen("https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json") as response:
 counties = json.load(response)

target_states = ['47']
counties['features'] = [f for f in counties['features'] if f['properties']['STATE'] in target_states]
In [24]:
df_fips['County'] = df_fips['County'].str.replace(" ","")

df_fips = df_fips.merge(
    assigned_districts,
    left_on = 'County',
    right_on = 'county',
    how = 'left'
)

print(df_fips[df_fips['district'].isna()])
      County   code  var county district
18  Davidson  47037  NaN    NaN      NaN
78    Shelby  47157  NaN    NaN      NaN
In [25]:
# df_fips.loc[df_fips['County'].str.contains("Van"),'district'] = '2'
df_fips.loc[df_fips['County']=='Davidson','district'] = '8'
df_fips.loc[df_fips['County']=='Shelby','district'] = '9'

rank_fig = px.choropleth(df_fips, geojson=counties, locations="code", color="district",
 scope="usa",
 hover_data=["County","district"]
 )
rank_fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
rank_fig.show()
In [26]:
assignment
Out[26]:
array([[Y_Anderson_1, Y_Anderson_2, Y_Anderson_3, Y_Anderson_4,
        Y_Anderson_5, Y_Anderson_6, Y_Anderson_7],
       [Y_Bedford_1, Y_Bedford_2, Y_Bedford_3, Y_Bedford_4, Y_Bedford_5,
        Y_Bedford_6, Y_Bedford_7],
       [Y_Benton_1, Y_Benton_2, Y_Benton_3, Y_Benton_4, Y_Benton_5,
        Y_Benton_6, Y_Benton_7],
       [Y_Bledsoe_1, Y_Bledsoe_2, Y_Bledsoe_3, Y_Bledsoe_4, Y_Bledsoe_5,
        Y_Bledsoe_6, Y_Bledsoe_7],
       [Y_Blount_1, Y_Blount_2, Y_Blount_3, Y_Blount_4, Y_Blount_5,
        Y_Blount_6, Y_Blount_7],
       [Y_Bradley_1, Y_Bradley_2, Y_Bradley_3, Y_Bradley_4, Y_Bradley_5,
        Y_Bradley_6, Y_Bradley_7],
       [Y_Campbell_1, Y_Campbell_2, Y_Campbell_3, Y_Campbell_4,
        Y_Campbell_5, Y_Campbell_6, Y_Campbell_7],
       [Y_Cannon_1, Y_Cannon_2, Y_Cannon_3, Y_Cannon_4, Y_Cannon_5,
        Y_Cannon_6, Y_Cannon_7],
       [Y_Carroll_1, Y_Carroll_2, Y_Carroll_3, Y_Carroll_4, Y_Carroll_5,
        Y_Carroll_6, Y_Carroll_7],
       [Y_Carter_1, Y_Carter_2, Y_Carter_3, Y_Carter_4, Y_Carter_5,
        Y_Carter_6, Y_Carter_7],
       [Y_Cheatham_1, Y_Cheatham_2, Y_Cheatham_3, Y_Cheatham_4,
        Y_Cheatham_5, Y_Cheatham_6, Y_Cheatham_7],
       [Y_Chester_1, Y_Chester_2, Y_Chester_3, Y_Chester_4, Y_Chester_5,
        Y_Chester_6, Y_Chester_7],
       [Y_Claiborne_1, Y_Claiborne_2, Y_Claiborne_3, Y_Claiborne_4,
        Y_Claiborne_5, Y_Claiborne_6, Y_Claiborne_7],
       [Y_Clay_1, Y_Clay_2, Y_Clay_3, Y_Clay_4, Y_Clay_5, Y_Clay_6,
        Y_Clay_7],
       [Y_Cocke_1, Y_Cocke_2, Y_Cocke_3, Y_Cocke_4, Y_Cocke_5, Y_Cocke_6,
        Y_Cocke_7],
       [Y_Coffee_1, Y_Coffee_2, Y_Coffee_3, Y_Coffee_4, Y_Coffee_5,
        Y_Coffee_6, Y_Coffee_7],
       [Y_Crockett_1, Y_Crockett_2, Y_Crockett_3, Y_Crockett_4,
        Y_Crockett_5, Y_Crockett_6, Y_Crockett_7],
       [Y_Cumberland_1, Y_Cumberland_2, Y_Cumberland_3, Y_Cumberland_4,
        Y_Cumberland_5, Y_Cumberland_6, Y_Cumberland_7],
       [Y_DeKalb_1, Y_DeKalb_2, Y_DeKalb_3, Y_DeKalb_4, Y_DeKalb_5,
        Y_DeKalb_6, Y_DeKalb_7],
       [Y_Decatur_1, Y_Decatur_2, Y_Decatur_3, Y_Decatur_4, Y_Decatur_5,
        Y_Decatur_6, Y_Decatur_7],
       [Y_Dickson_1, Y_Dickson_2, Y_Dickson_3, Y_Dickson_4, Y_Dickson_5,
        Y_Dickson_6, Y_Dickson_7],
       [Y_Dyer_1, Y_Dyer_2, Y_Dyer_3, Y_Dyer_4, Y_Dyer_5, Y_Dyer_6,
        Y_Dyer_7],
       [Y_Fayette_1, Y_Fayette_2, Y_Fayette_3, Y_Fayette_4, Y_Fayette_5,
        Y_Fayette_6, Y_Fayette_7],
       [Y_Fentress_1, Y_Fentress_2, Y_Fentress_3, Y_Fentress_4,
        Y_Fentress_5, Y_Fentress_6, Y_Fentress_7],
       [Y_Franklin_1, Y_Franklin_2, Y_Franklin_3, Y_Franklin_4,
        Y_Franklin_5, Y_Franklin_6, Y_Franklin_7],
       [Y_Gibson_1, Y_Gibson_2, Y_Gibson_3, Y_Gibson_4, Y_Gibson_5,
        Y_Gibson_6, Y_Gibson_7],
       [Y_Giles_1, Y_Giles_2, Y_Giles_3, Y_Giles_4, Y_Giles_5, Y_Giles_6,
        Y_Giles_7],
       [Y_Grainger_1, Y_Grainger_2, Y_Grainger_3, Y_Grainger_4,
        Y_Grainger_5, Y_Grainger_6, Y_Grainger_7],
       [Y_Greene_1, Y_Greene_2, Y_Greene_3, Y_Greene_4, Y_Greene_5,
        Y_Greene_6, Y_Greene_7],
       [Y_Grundy_1, Y_Grundy_2, Y_Grundy_3, Y_Grundy_4, Y_Grundy_5,
        Y_Grundy_6, Y_Grundy_7],
       [Y_Hamblen_1, Y_Hamblen_2, Y_Hamblen_3, Y_Hamblen_4, Y_Hamblen_5,
        Y_Hamblen_6, Y_Hamblen_7],
       [Y_Hamilton_1, Y_Hamilton_2, Y_Hamilton_3, Y_Hamilton_4,
        Y_Hamilton_5, Y_Hamilton_6, Y_Hamilton_7],
       [Y_Hancock_1, Y_Hancock_2, Y_Hancock_3, Y_Hancock_4, Y_Hancock_5,
        Y_Hancock_6, Y_Hancock_7],
       [Y_Hardeman_1, Y_Hardeman_2, Y_Hardeman_3, Y_Hardeman_4,
        Y_Hardeman_5, Y_Hardeman_6, Y_Hardeman_7],
       [Y_Hardin_1, Y_Hardin_2, Y_Hardin_3, Y_Hardin_4, Y_Hardin_5,
        Y_Hardin_6, Y_Hardin_7],
       [Y_Hawkins_1, Y_Hawkins_2, Y_Hawkins_3, Y_Hawkins_4, Y_Hawkins_5,
        Y_Hawkins_6, Y_Hawkins_7],
       [Y_Haywood_1, Y_Haywood_2, Y_Haywood_3, Y_Haywood_4, Y_Haywood_5,
        Y_Haywood_6, Y_Haywood_7],
       [Y_Henderson_1, Y_Henderson_2, Y_Henderson_3, Y_Henderson_4,
        Y_Henderson_5, Y_Henderson_6, Y_Henderson_7],
       [Y_Henry_1, Y_Henry_2, Y_Henry_3, Y_Henry_4, Y_Henry_5, Y_Henry_6,
        Y_Henry_7],
       [Y_Hickman_1, Y_Hickman_2, Y_Hickman_3, Y_Hickman_4, Y_Hickman_5,
        Y_Hickman_6, Y_Hickman_7],
       [Y_Houston_1, Y_Houston_2, Y_Houston_3, Y_Houston_4, Y_Houston_5,
        Y_Houston_6, Y_Houston_7],
       [Y_Humphreys_1, Y_Humphreys_2, Y_Humphreys_3, Y_Humphreys_4,
        Y_Humphreys_5, Y_Humphreys_6, Y_Humphreys_7],
       [Y_Jackson_1, Y_Jackson_2, Y_Jackson_3, Y_Jackson_4, Y_Jackson_5,
        Y_Jackson_6, Y_Jackson_7],
       [Y_Jefferson_1, Y_Jefferson_2, Y_Jefferson_3, Y_Jefferson_4,
        Y_Jefferson_5, Y_Jefferson_6, Y_Jefferson_7],
       [Y_Johnson_1, Y_Johnson_2, Y_Johnson_3, Y_Johnson_4, Y_Johnson_5,
        Y_Johnson_6, Y_Johnson_7],
       [Y_Knox_1, Y_Knox_2, Y_Knox_3, Y_Knox_4, Y_Knox_5, Y_Knox_6,
        Y_Knox_7],
       [Y_Lake_1, Y_Lake_2, Y_Lake_3, Y_Lake_4, Y_Lake_5, Y_Lake_6,
        Y_Lake_7],
       [Y_Lauderdale_1, Y_Lauderdale_2, Y_Lauderdale_3, Y_Lauderdale_4,
        Y_Lauderdale_5, Y_Lauderdale_6, Y_Lauderdale_7],
       [Y_Lawrence_1, Y_Lawrence_2, Y_Lawrence_3, Y_Lawrence_4,
        Y_Lawrence_5, Y_Lawrence_6, Y_Lawrence_7],
       [Y_Lewis_1, Y_Lewis_2, Y_Lewis_3, Y_Lewis_4, Y_Lewis_5, Y_Lewis_6,
        Y_Lewis_7],
       [Y_Lincoln_1, Y_Lincoln_2, Y_Lincoln_3, Y_Lincoln_4, Y_Lincoln_5,
        Y_Lincoln_6, Y_Lincoln_7],
       [Y_Loudon_1, Y_Loudon_2, Y_Loudon_3, Y_Loudon_4, Y_Loudon_5,
        Y_Loudon_6, Y_Loudon_7],
       [Y_Macon_1, Y_Macon_2, Y_Macon_3, Y_Macon_4, Y_Macon_5, Y_Macon_6,
        Y_Macon_7],
       [Y_Madison_1, Y_Madison_2, Y_Madison_3, Y_Madison_4, Y_Madison_5,
        Y_Madison_6, Y_Madison_7],
       [Y_Marion_1, Y_Marion_2, Y_Marion_3, Y_Marion_4, Y_Marion_5,
        Y_Marion_6, Y_Marion_7],
       [Y_Marshall_1, Y_Marshall_2, Y_Marshall_3, Y_Marshall_4,
        Y_Marshall_5, Y_Marshall_6, Y_Marshall_7],
       [Y_Maury_1, Y_Maury_2, Y_Maury_3, Y_Maury_4, Y_Maury_5, Y_Maury_6,
        Y_Maury_7],
       [Y_McMinn_1, Y_McMinn_2, Y_McMinn_3, Y_McMinn_4, Y_McMinn_5,
        Y_McMinn_6, Y_McMinn_7],
       [Y_McNairy_1, Y_McNairy_2, Y_McNairy_3, Y_McNairy_4, Y_McNairy_5,
        Y_McNairy_6, Y_McNairy_7],
       [Y_Meigs_1, Y_Meigs_2, Y_Meigs_3, Y_Meigs_4, Y_Meigs_5, Y_Meigs_6,
        Y_Meigs_7],
       [Y_Monroe_1, Y_Monroe_2, Y_Monroe_3, Y_Monroe_4, Y_Monroe_5,
        Y_Monroe_6, Y_Monroe_7],
       [Y_Montgomery_1, Y_Montgomery_2, Y_Montgomery_3, Y_Montgomery_4,
        Y_Montgomery_5, Y_Montgomery_6, Y_Montgomery_7],
       [Y_Moore_1, Y_Moore_2, Y_Moore_3, Y_Moore_4, Y_Moore_5, Y_Moore_6,
        Y_Moore_7],
       [Y_Morgan_1, Y_Morgan_2, Y_Morgan_3, Y_Morgan_4, Y_Morgan_5,
        Y_Morgan_6, Y_Morgan_7],
       [Y_Obion_1, Y_Obion_2, Y_Obion_3, Y_Obion_4, Y_Obion_5, Y_Obion_6,
        Y_Obion_7],
       [Y_Overton_1, Y_Overton_2, Y_Overton_3, Y_Overton_4, Y_Overton_5,
        Y_Overton_6, Y_Overton_7],
       [Y_Perry_1, Y_Perry_2, Y_Perry_3, Y_Perry_4, Y_Perry_5, Y_Perry_6,
        Y_Perry_7],
       [Y_Pickett_1, Y_Pickett_2, Y_Pickett_3, Y_Pickett_4, Y_Pickett_5,
        Y_Pickett_6, Y_Pickett_7],
       [Y_Polk_1, Y_Polk_2, Y_Polk_3, Y_Polk_4, Y_Polk_5, Y_Polk_6,
        Y_Polk_7],
       [Y_Putnam_1, Y_Putnam_2, Y_Putnam_3, Y_Putnam_4, Y_Putnam_5,
        Y_Putnam_6, Y_Putnam_7],
       [Y_Rhea_1, Y_Rhea_2, Y_Rhea_3, Y_Rhea_4, Y_Rhea_5, Y_Rhea_6,
        Y_Rhea_7],
       [Y_Roane_1, Y_Roane_2, Y_Roane_3, Y_Roane_4, Y_Roane_5, Y_Roane_6,
        Y_Roane_7],
       [Y_Robertson_1, Y_Robertson_2, Y_Robertson_3, Y_Robertson_4,
        Y_Robertson_5, Y_Robertson_6, Y_Robertson_7],
       [Y_Rutherford_1, Y_Rutherford_2, Y_Rutherford_3, Y_Rutherford_4,
        Y_Rutherford_5, Y_Rutherford_6, Y_Rutherford_7],
       [Y_Scott_1, Y_Scott_2, Y_Scott_3, Y_Scott_4, Y_Scott_5, Y_Scott_6,
        Y_Scott_7],
       [Y_Sequatchie_1, Y_Sequatchie_2, Y_Sequatchie_3, Y_Sequatchie_4,
        Y_Sequatchie_5, Y_Sequatchie_6, Y_Sequatchie_7],
       [Y_Sevier_1, Y_Sevier_2, Y_Sevier_3, Y_Sevier_4, Y_Sevier_5,
        Y_Sevier_6, Y_Sevier_7],
       [Y_Smith_1, Y_Smith_2, Y_Smith_3, Y_Smith_4, Y_Smith_5, Y_Smith_6,
        Y_Smith_7],
       [Y_Stewart_1, Y_Stewart_2, Y_Stewart_3, Y_Stewart_4, Y_Stewart_5,
        Y_Stewart_6, Y_Stewart_7],
       [Y_Sullivan_1, Y_Sullivan_2, Y_Sullivan_3, Y_Sullivan_4,
        Y_Sullivan_5, Y_Sullivan_6, Y_Sullivan_7],
       [Y_Sumner_1, Y_Sumner_2, Y_Sumner_3, Y_Sumner_4, Y_Sumner_5,
        Y_Sumner_6, Y_Sumner_7],
       [Y_Tipton_1, Y_Tipton_2, Y_Tipton_3, Y_Tipton_4, Y_Tipton_5,
        Y_Tipton_6, Y_Tipton_7],
       [Y_Trousdale_1, Y_Trousdale_2, Y_Trousdale_3, Y_Trousdale_4,
        Y_Trousdale_5, Y_Trousdale_6, Y_Trousdale_7],
       [Y_Unicoi_1, Y_Unicoi_2, Y_Unicoi_3, Y_Unicoi_4, Y_Unicoi_5,
        Y_Unicoi_6, Y_Unicoi_7],
       [Y_Union_1, Y_Union_2, Y_Union_3, Y_Union_4, Y_Union_5, Y_Union_6,
        Y_Union_7],
       [Y_Van_Buren_1, Y_Van_Buren_2, Y_Van_Buren_3, Y_Van_Buren_4,
        Y_Van_Buren_5, Y_Van_Buren_6, Y_Van_Buren_7],
       [Y_Warren_1, Y_Warren_2, Y_Warren_3, Y_Warren_4, Y_Warren_5,
        Y_Warren_6, Y_Warren_7],
       [Y_Washington_1, Y_Washington_2, Y_Washington_3, Y_Washington_4,
        Y_Washington_5, Y_Washington_6, Y_Washington_7],
       [Y_Wayne_1, Y_Wayne_2, Y_Wayne_3, Y_Wayne_4, Y_Wayne_5, Y_Wayne_6,
        Y_Wayne_7],
       [Y_Weakley_1, Y_Weakley_2, Y_Weakley_3, Y_Weakley_4, Y_Weakley_5,
        Y_Weakley_6, Y_Weakley_7],
       [Y_White_1, Y_White_2, Y_White_3, Y_White_4, Y_White_5, Y_White_6,
        Y_White_7],
       [Y_Williamson_1, Y_Williamson_2, Y_Williamson_3, Y_Williamson_4,
        Y_Williamson_5, Y_Williamson_6, Y_Williamson_7],
       [Y_Wilson_1, Y_Wilson_2, Y_Wilson_3, Y_Wilson_4, Y_Wilson_5,
        Y_Wilson_6, Y_Wilson_7]], dtype=object)
In [ ]: